BUG();
}
- if ( unlikely(shadow_mode(d)) &&
+ if ( unlikely(shadow_mode_enabled(d)) &&
(get_shadow_status(d, page_to_pfn(page)) & PSH_shadowed) )
{
unshadow_table(page_to_pfn(page), type);
cleanup_writable_pagetable(d);
- if ( unlikely(shadow_mode(d)) )
+ if ( unlikely(shadow_mode_enabled(d)) )
check_pagetable(d, ed->arch.guest_table, "pre-mmu"); /* debug */
+ if ( unlikely(shadow_mode_translate(d) ) )
+ domain_crash();
+
/*
* If we are resuming after preemption, read how much work we have already
* done. This allows us to set the @done output parameter correctly.
okay = mod_l1_entry((l1_pgentry_t *)va,
mk_l1_pgentry(req.val));
- if ( unlikely(shadow_mode(d)) && okay &&
+ if ( unlikely(shadow_mode_enabled(d)) && okay &&
(get_shadow_status(d, page-frame_table) &
PSH_shadowed) )
{
mk_l2_pgentry(req.val),
pfn);
- if ( unlikely(shadow_mode(d)) && okay &&
+ if ( unlikely(shadow_mode_enabled(d)) && okay &&
(get_shadow_status(d, page-frame_table) &
PSH_shadowed) )
{
mk_l3_pgentry(req.val),
pfn);
- if ( unlikely(shadow_mode(d)) && okay &&
+ if ( unlikely(shadow_mode_enabled(d)) && okay &&
(get_shadow_status(d, page-frame_table) &
PSH_shadowed) )
{
mk_l4_pgentry(req.val),
pfn);
- if ( unlikely(shadow_mode(d)) && okay &&
+ if ( unlikely(shadow_mode_enabled(d)) && okay &&
(get_shadow_status(d, page-frame_table) &
PSH_shadowed) )
{
* If in log-dirty mode, mark the corresponding pseudo-physical
* page as dirty.
*/
- if ( unlikely(shadow_mode(d) == SHM_logdirty) &&
+ if ( unlikely(shadow_mode_log_dirty(d)) &&
mark_dirty(d, pfn) )
d->arch.shadow_dirty_block_count++;
if ( unlikely(pdone != NULL) )
__put_user(done + i, pdone);
- if ( unlikely(shadow_mode(d)) )
+ if ( unlikely(shadow_mode_enabled(d)) )
check_pagetable(d, ed->arch.guest_table, "post-mmu"); /* debug */
UNLOCK_BIGLOCK(d);
if ( unlikely(!__addr_ok(va)) )
return -EINVAL;
+ if ( unlikely(shadow_mode_translate(d) ) )
+ domain_crash();
+
LOCK_BIGLOCK(d);
cleanup_writable_pagetable(d);
mk_l1_pgentry(val))) )
err = -EINVAL;
- if ( unlikely(shadow_mode(d)) )
+ if ( unlikely(shadow_mode_enabled(d)) )
{
unsigned long sval = 0;
* the PTE in the PT-holding page. We need the machine frame number
* for this.
*/
- if ( shadow_mode(d) == SHM_logdirty )
+ if ( shadow_mode_log_dirty(d) )
mark_dirty(d, va_to_l1mfn(va));
check_pagetable(d, ed->arch.guest_table, "va"); /* debug */
PTWR_PRINT_WHICH, ptep, pte);
pte &= ~_PAGE_RW;
- if ( unlikely(shadow_mode(d)) )
+ if ( unlikely(shadow_mode_enabled(d)) )
{
/* Write-protect the p.t. page in the shadow page table. */
l1pte_propagate_from_guest(d, &pte, &spte);
* STEP 3. Reattach the L1 p.t. page into the current address space.
*/
- if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode(d)) )
+ if ( (which == PTWR_PT_ACTIVE) && likely(!shadow_mode_enabled(d)) )
{
pl2e = &linear_l2_table[ptwr_info[cpu].ptinfo[which].l2_idx];
*pl2e = mk_l2_pgentry(l2_pgentry_val(*pl2e) | _PAGE_PRESENT);
/* For safety, disconnect the L1 p.t. page from current space. */
if ( (which == PTWR_PT_ACTIVE) &&
- likely(!shadow_mode(current->domain)) )
+ likely(!shadow_mode_enabled(current->domain)) )
{
*pl2e = mk_l2_pgentry(l2e & ~_PAGE_PRESENT);
#if 1
shadow_audit(d, 1);
+ if( !d->arch.shadow_ht ) return;
+
/* Free each hash chain in turn. */
for ( i = 0; i < shadow_ht_buckets; i++ )
{
/* We clear L2 pages by zeroing the guest entries. */
case PGT_l2_page_table:
p = map_domain_mem((spage - frame_table) << PAGE_SHIFT);
- if ( shadow_mode(d) == SHM_full_32 )
+ if ( shadow_mode_external(d) )
memset(p, 0, L2_PAGETABLE_ENTRIES * sizeof(*p));
else
memset(p, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE * sizeof(*p));
int __shadow_mode_enable(struct domain *d, unsigned int mode)
{
+ d->arch.shadow_mode = mode;
+
if (!d->arch.shadow_ht)
{
d->arch.shadow_ht = xmalloc_array(struct shadow_status, shadow_ht_buckets);
shadow_ht_buckets * sizeof(struct shadow_status));
}
- if ( mode == SHM_logdirty && !d->arch.shadow_dirty_bitmap)
+ if ( shadow_mode_log_dirty(d) && !d->arch.shadow_dirty_bitmap)
{
d->arch.shadow_dirty_bitmap_size = (d->max_pages + 63) & ~63;
d->arch.shadow_dirty_bitmap =
d->arch.shadow_dirty_bitmap_size/8);
}
- d->arch.shadow_mode = mode;
-
return 0;
nomem:
break;
case DOM0_SHADOW_CONTROL_OP_ENABLE_TEST:
- shadow_mode_disable(d);
- rc = __shadow_mode_enable(d, SHM_test);
+ free_shadow_state(d);
+ rc = __shadow_mode_enable(d, SHM_enable);
break;
case DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY:
- shadow_mode_disable(d);
- rc = __shadow_mode_enable(d, SHM_logdirty);
+ free_shadow_state(d);
+ rc = __shadow_mode_enable(d, d->arch.shadow_mode|SHM_log_dirty);
break;
default:
- rc = shadow_mode(d) ? shadow_mode_table_op(d, sc) : -EINVAL;
+ rc = shadow_mode_enabled(d) ? shadow_mode_table_op(d, sc) : -EINVAL;
break;
}
#ifdef __i386__
/* Install hypervisor and 2x linear p.t. mapings. */
- if ( shadow_mode(d) == SHM_full_32 )
+ if ( shadow_mode_translate(d) )
{
#ifdef CONFIG_VMX
vmx_update_shadow_state(d->exec_domain[0], gpfn, spfn);
mk_l2_pgentry(__pa(page_get_owner(
&frame_table[gpfn])->arch.mm_perdomain_pt) |
__PAGE_HYPERVISOR);
- }
-#endif
- if ( shadow_mode(d) != SHM_full_32 )
unmap_domain_mem(spl2e);
+ }
+#endif
SH_VLOG("shadow_l2_table( %p -> %p)", gpfn, spfn);
return spfn;
L2_PAGETABLE_SHIFT]),
(smfn << PAGE_SHIFT) | __PAGE_HYPERVISOR);
- if ( shadow_mode(d) != SHM_full_32 ) {
+ if ( !shadow_mode_translate(d) ) {
if ( (l2_pgentry_val(spl2e[PERDOMAIN_VIRT_START >> L2_PAGETABLE_SHIFT]) !=
((v2m(page_get_owner(&frame_table[gmfn])->arch.mm_perdomain_pt) |
__PAGE_HYPERVISOR))) )
#define PSH_pfn_mask ((1<<21)-1)
/* Shadow PT operation mode : shadow-mode variable in arch_domain. */
-#define SHM_test (1) /* just run domain on shadow PTs */
-#define SHM_logdirty (2) /* log pages that are dirtied */
-#define SHM_translate (3) /* lookup machine pages in translation table */
-#define SHM_cow (4) /* copy on write all dirtied pages */
-#define SHM_full_32 (8) /* full virtualization for 32-bit */
+
+#define SHM_enable (1<<0) /* we're in one of the shadow modes */
+#define SHM_log_dirty (1<<1) /* enable log dirty mode */
+#define SHM_translate (1<<2) /* do p2m tranaltion on guest tables */
+#define SHM_external (1<<3) /* external page table, not used by Xen */
+
+#define shadow_mode_enabled(_d) ((_d)->arch.shadow_mode)
+#define shadow_mode_log_dirty(_d) ((_d)->arch.shadow_mode & SHM_log_dirty)
+#define shadow_mode_translate(_d) ((_d)->arch.shadow_mode & SHM_translate)
+#define shadow_mode_external(_d) ((_d)->arch.shadow_mode & SHM_external)
#define shadow_linear_pg_table ((l1_pgentry_t *)SH_LINEAR_PT_VIRT_START)
#define shadow_linear_l2_table ((l2_pgentry_t *)(SH_LINEAR_PT_VIRT_START + \
(SH_LINEAR_PT_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
-#define shadow_mode(_d) ((_d)->arch.shadow_mode)
#define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock)
#define shadow_lock(_d) spin_lock(&(_d)->arch.shadow_lock)
#define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock)
#endif
#define __mfn_to_gpfn(_d, mfn) \
- ( (shadow_mode(_d) == SHM_full_32) \
+ ( (shadow_mode_translate(_d)) \
? machine_to_phys_mapping[(mfn)] \
: (mfn) )
#define __gpfn_to_mfn(_d, gpfn) \
- ( (shadow_mode(_d) == SHM_full_32) \
+ ( (shadow_mode_translate(_d)) \
? phys_to_machine_mapping(gpfn) \
: (gpfn) )
extern void __shadow_mode_disable(struct domain *d);
static inline void shadow_mode_disable(struct domain *d)
{
- if ( shadow_mode(d) )
+ if ( shadow_mode_enabled(d) )
__shadow_mode_disable(d);
}
struct domain *d, unsigned long gpfn);
static inline void shadow_invalidate(struct exec_domain *ed) {
- if ( shadow_mode(ed->domain) != SHM_full_32 )
+ if ( !shadow_mode_translate(ed->domain))
BUG();
memset(ed->arch.shadow_vtable, 0, PAGE_SIZE);
}
static inline void __shadow_get_l2e(
struct exec_domain *ed, unsigned long va, unsigned long *sl2e)
{
- if ( shadow_mode(ed->domain) == SHM_full_32 ) {
- *sl2e = l2_pgentry_val(ed->arch.shadow_vtable[l2_table_offset(va)]);
- }
- else if ( shadow_mode(ed->domain) ) {
- *sl2e = l2_pgentry_val(shadow_linear_l2_table[l2_table_offset(va)]);
+ if ( likely(shadow_mode_enabled(ed->domain)) ) {
+ if ( shadow_mode_translate(ed->domain) )
+ *sl2e = l2_pgentry_val(
+ ed->arch.shadow_vtable[l2_table_offset(va)]);
+ else
+ *sl2e = l2_pgentry_val(
+ shadow_linear_l2_table[l2_table_offset(va)]);
}
- else
+ else {
+ BUG(); /* why do we need this case? */
*sl2e = l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
+ }
}
static inline void __shadow_set_l2e(
struct exec_domain *ed, unsigned long va, unsigned long value)
{
- if ( shadow_mode(ed->domain) == SHM_full_32 ) {
- ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
- }
- else if ( shadow_mode(ed->domain) ) {
- shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
+ if ( likely(shadow_mode_enabled(ed->domain)) ) {
+ if ( shadow_mode_translate(ed->domain) )
+ ed->arch.shadow_vtable[l2_table_offset(va)] = mk_l2_pgentry(value);
+ else
+ shadow_linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
}
else
+ {
+ BUG(); /* why do we need this case? */
linear_l2_table[l2_table_offset(va)] = mk_l2_pgentry(value);
+ }
}
static inline void __guest_get_l2e(
struct exec_domain *ed, unsigned long va, unsigned long *l2e)
{
- *l2e = ( shadow_mode(ed->domain) == SHM_full_32) ?
+ *l2e = ( shadow_mode_translate(ed->domain) ) ?
l2_pgentry_val(ed->arch.vpagetable[l2_table_offset(va)]) :
l2_pgentry_val(linear_l2_table[l2_table_offset(va)]);
}
static inline void __guest_set_l2e(
struct exec_domain *ed, unsigned long va, unsigned long value)
{
- if ( shadow_mode(ed->domain) == SHM_full_32 )
+ if ( shadow_mode_translate(ed->domain) )
{
unsigned long pfn;
ASSERT(gpte & _PAGE_RW);
gpte |= _PAGE_DIRTY | _PAGE_ACCESSED;
- if ( shadow_mode(d) == SHM_logdirty )
+ if ( shadow_mode_log_dirty(d) )
__mark_dirty(d, pfn);
spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
gpte |= _PAGE_ACCESSED;
spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
- if ( (shadow_mode(d) == SHM_logdirty) || ! (gpte & _PAGE_DIRTY) )
+ if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) )
spte &= ~_PAGE_RW;
SH_VVLOG("l1pte_read_fault: updating spte=0x%p gpte=0x%p", spte, gpte);
{
unsigned long gpte = *gpte_p;
unsigned long spte = *spte_p;
- unsigned long host_pfn, host_gpte;
+ unsigned long pfn = gpte >> PAGE_SHIFT;
+ unsigned long mfn = __gpfn_to_mfn(d, pfn);
+
#if SHADOW_VERBOSE_DEBUG
unsigned long old_spte = spte;
#endif
- switch ( shadow_mode(d) )
- {
- case SHM_test:
- spte = 0;
- if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
- (_PAGE_PRESENT|_PAGE_ACCESSED) )
- spte = (gpte & _PAGE_DIRTY) ? gpte : (gpte & ~_PAGE_RW);
- break;
-
- case SHM_logdirty:
- spte = 0;
- if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
- (_PAGE_PRESENT|_PAGE_ACCESSED) )
- spte = gpte & ~_PAGE_RW;
- break;
-
- case SHM_full_32:
- spte = 0;
-
- if ( mmio_space(gpte & 0xFFFFF000) )
- {
- *spte_p = spte;
- return;
- }
+ if ( shadow_mode_external(d) && mmio_space(gpte & 0xFFFFF000) ) {
+ *spte_p = 0;
+ return;
+ }
+
+ spte = 0;
+ if ( (gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
+ (_PAGE_PRESENT|_PAGE_ACCESSED) ) {
- host_pfn = phys_to_machine_mapping(gpte >> PAGE_SHIFT);
- host_gpte = (host_pfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
-
- if ( (host_gpte & (_PAGE_PRESENT|_PAGE_ACCESSED) ) ==
- (_PAGE_PRESENT|_PAGE_ACCESSED) )
- spte = (host_gpte & _PAGE_DIRTY) ?
- host_gpte : (host_gpte & ~_PAGE_RW);
-
- break;
+ spte = (mfn << PAGE_SHIFT) | (gpte & ~PAGE_MASK);
+
+ if ( shadow_mode_log_dirty(d) || !(gpte & _PAGE_DIRTY) )
+ spte &= ~_PAGE_RW;
}
-
+
#if SHADOW_VERBOSE_DEBUG
if ( old_spte || spte || gpte )
SH_VVLOG("l1pte_propagate_from_guest: gpte=0x%p, old spte=0x%p, new spte=0x%p ", gpte, old_spte, spte);
*spte_p = spte;
}
+
+
static inline void l2pde_general(
struct domain *d,
unsigned long *gpde_p,
if ( (frame_table[sl1mfn].u.inuse.type_info & PGT_type_mask) ==
PGT_l2_page_table )
{
- if ( shadow_mode(d) != SHM_full_32 )
+ if ( !shadow_mode_translate(d) )
spde = gpde & ~_PAGE_RW;
}
{
unsigned long res;
- ASSERT(shadow_mode(d));
+ ASSERT(shadow_mode_enabled(d));
/*
* If we get here we know that some sort of update has happened to the
* has changed type. If we're in log dirty mode, we should set the
* appropriate bit in the dirty bitmap.
* N.B. The VA update path doesn't use this and is handled independently.
+
+ XXX need to think this through for vmx guests, but probably OK
*/
shadow_lock(d);
- if ( shadow_mode(d) == SHM_logdirty )
+ if ( shadow_mode_log_dirty(d) )
__mark_dirty(d, gpfn);
if ( !(res = __shadow_status(d, gpfn)) )
smfn = shadow_l2_table(d, gpfn);
#ifdef CONFIG_VMX
else
- if (d->arch.shadow_mode == SHM_full_32)
+ if (shadow_mode_translate(ed->domain) )
{
vmx_update_shadow_state(ed, gpfn, smfn);
}
ed->arch.shadow_table = mk_pagetable(smfn<<PAGE_SHIFT);
- if (d->arch.shadow_mode != SHM_full_32)
+ if ( !shadow_mode_external(ed->domain) )
ed->arch.monitor_table = ed->arch.shadow_table;
}
static inline void update_pagetables(struct exec_domain *ed)
{
- if ( unlikely(shadow_mode(ed->domain)) )
+ if ( unlikely(shadow_mode_enabled(ed->domain)) )
{
SH_VVLOG("update_pagetables( gptbase=%p, mode=%d )",
pagetable_val(ed->arch.guest_table),